#ifdef CONFIG_VMX
-/* Check if the request is handled inside xen
- return value: 0 --not handled; 1 --handled */
+/*
+ * Check if the request is handled inside xen
+ * return value: 0 --not handled; 1 --handled
+ */
int vmx_io_intercept(ioreq_t *p, int type)
{
- struct vcpu *d = current;
- struct vmx_handler_t *handler = &(d->domain->arch.vmx_platform.vmx_handler);
+ struct vcpu *v = current;
+ struct vmx_io_handler *handler =
+ &(v->domain->arch.vmx_platform.vmx_io_handler);
int i;
- unsigned long addr, offset;
+ unsigned long addr, size;
+
for (i = 0; i < handler->num_slot; i++) {
if( type != handler->hdl_list[i].type)
continue;
- addr = handler->hdl_list[i].addr;
- offset = handler->hdl_list[i].offset;
+ addr = handler->hdl_list[i].addr;
+ size = handler->hdl_list[i].size;
if (p->addr >= addr &&
- p->addr < addr + offset)
+ p->addr < addr + size)
return handler->hdl_list[i].action(p);
}
return 0;
}
-int register_io_handler(unsigned long addr, unsigned long offset,
+int register_io_handler(unsigned long addr, unsigned long size,
intercept_action_t action, int type)
{
- struct vcpu *d = current;
- struct vmx_handler_t *handler = &(d->domain->arch.vmx_platform.vmx_handler);
+ struct vcpu *v = current;
+ struct vmx_io_handler *handler =
+ &(v->domain->arch.vmx_platform.vmx_io_handler);
int num = handler->num_slot;
if (num >= MAX_IO_HANDLER) {
}
handler->hdl_list[num].addr = addr;
- handler->hdl_list[num].offset = offset;
+ handler->hdl_list[num].size = size;
handler->hdl_list[num].action = action;
handler->hdl_list[num].type = type;
handler->num_slot++;
- return 1;
+ return 1;
}
-static void pit_cal_count(struct vmx_virpit_t *vpit)
+static void pit_cal_count(struct vmx_virpit *vpit)
{
u64 nsec_delta = (unsigned int)((NOW() - vpit->inject_point));
if (nsec_delta > vpit->period)
vpit->count = vpit->init_val - ((nsec_delta * PIT_FREQ / 1000000000ULL) % vpit->init_val );
}
-static void pit_latch_io(struct vmx_virpit_t *vpit)
+static void pit_latch_io(struct vmx_virpit *vpit)
{
pit_cal_count(vpit);
vpit->count_MSB_latched=1;
break;
default:
- BUG();
+ domain_crash_synchronous();
}
}
-static int pit_read_io(struct vmx_virpit_t *vpit)
+static int pit_read_io(struct vmx_virpit *vpit)
{
if(vpit->count_LSB_latched) {
/* Read Least Significant Byte */
/* the intercept action for PIT DM retval:0--not handled; 1--handled */
int intercept_pit_io(ioreq_t *p)
{
- struct vcpu *d = current;
- struct vmx_virpit_t *vpit = &(d->domain->arch.vmx_platform.vmx_pit);
+ struct vcpu *v = current;
+ struct vmx_virpit *vpit = &(v->domain->arch.vmx_platform.vmx_pit);
if (p->size != 1 ||
p->pdata_valid ||
/* hooks function for the PIT initialization response iopacket */
static void pit_timer_fn(void *data)
{
- struct vmx_virpit_t *vpit = data;
+ struct vmx_virpit *vpit = data;
s_time_t next;
int missed_ticks;
/* Only some PIT operations such as load init counter need a hypervisor hook.
* leave all other operations in user space DM
*/
-void vmx_hooks_assist(struct vcpu *d)
+void vmx_hooks_assist(struct vcpu *v)
{
- vcpu_iodata_t * vio = get_vio(d->domain, d->vcpu_id);
+ vcpu_iodata_t *vio = get_vio(v->domain, v->vcpu_id);
ioreq_t *p = &vio->vp_ioreq;
- shared_iopage_t *sp = get_sp(d->domain);
+ shared_iopage_t *sp = get_sp(v->domain);
u64 *intr = &(sp->sp_global.pic_intr[0]);
- struct vmx_virpit_t *vpit = &(d->domain->arch.vmx_platform.vmx_pit);
+ struct vmx_virpit *vpit = &(v->domain->arch.vmx_platform.vmx_pit);
int rw_mode, reinit = 0;
int oldvec = 0;
/* load init count*/
- if (p->state == STATE_IORESP_HOOK) {
+ if (p->state == STATE_IORESP_HOOK) {
/* set up actimer, handle re-init */
if ( active_ac_timer(&(vpit->pit_timer)) ) {
VMX_DBG_LOG(DBG_LEVEL_1, "VMX_PIT: guest reset PIT with channel %lx!\n", (unsigned long) ((p->u.data >> 24) & 0x3) );
oldvec = vpit->vector;
}
else
- init_ac_timer(&vpit->pit_timer, pit_timer_fn, vpit, d->processor);
+ init_ac_timer(&vpit->pit_timer, pit_timer_fn, vpit, v->processor);
/* init count for this channel */
- vpit->init_val = (p->u.data & 0xFFFF) ;
+ vpit->init_val = (p->u.data & 0xFFFF) ;
/* frequency(ns) of pit */
- vpit->period = DIV_ROUND(((vpit->init_val) * 1000000000ULL), PIT_FREQ);
+ vpit->period = DIV_ROUND(((vpit->init_val) * 1000000000ULL), PIT_FREQ);
VMX_DBG_LOG(DBG_LEVEL_1,"VMX_PIT: guest set init pit freq:%u ns, initval:0x%x\n", vpit->period, vpit->init_val);
if (vpit->period < 900000) { /* < 0.9 ms */
printk("VMX_PIT: guest programmed too small an init_val: %x\n",
}
vpit->intr_bitmap = intr;
- vpit->v = d;
+ vpit->v = v;
vpit->scheduled = NOW() + vpit->period;
set_ac_timer(&vpit->pit_timer, vpit->scheduled);
#include <xen/errno.h>
#include <public/io/ioreq.h>
-#define MAX_IO_HANDLER 10
+#define MAX_IO_HANDLER 4
-typedef int (*intercept_action_t)(ioreq_t*);
+#define VMX_PORTIO 0
+#define VMX_MMIO 1
-enum {PORTIO, MMIO};
+typedef int (*intercept_action_t)(ioreq_t *);
-struct vmx_handler_t {
- int num_slot;
- struct {
- unsigned long addr;
- int type;
- unsigned long offset;
- intercept_action_t action;
- } hdl_list[MAX_IO_HANDLER];
+struct io_handler {
+ int type;
+ unsigned long addr;
+ unsigned long size;
+ intercept_action_t action;
+};
+
+struct vmx_io_handler {
+ int num_slot;
+ struct io_handler hdl_list[MAX_IO_HANDLER];
};
/* global io interception point in HV */
extern int vmx_io_intercept(ioreq_t *p, int type);
-extern int register_io_handler(unsigned long addr, unsigned long offset,
+extern int register_io_handler(unsigned long addr, unsigned long size,
intercept_action_t action, int type);
static inline int vmx_portio_intercept(ioreq_t *p)
{
- return vmx_io_intercept(p, PORTIO);
+ return vmx_io_intercept(p, VMX_PORTIO);
}
static inline int vmx_mmio_intercept(ioreq_t *p)
{
- return vmx_io_intercept(p, MMIO);
+ return vmx_io_intercept(p, VMX_MMIO);
}
-static inline int register_portio_handler(unsigned long addr,
- unsigned long offset,
+static inline int register_portio_handler(unsigned long addr,
+ unsigned long size,
intercept_action_t action)
{
- return register_io_handler(addr, offset, action, PORTIO);
+ return register_io_handler(addr, size, action, VMX_PORTIO);
}
-static inline int register_mmio_handler(unsigned long addr,
- unsigned long offset,
+static inline int register_mmio_handler(unsigned long addr,
+ unsigned long size,
intercept_action_t action)
{
- return register_io_handler(addr, offset, action, MMIO);
+ return register_io_handler(addr, size, action, VMX_MMIO);
}
#endif /* _VMX_INTERCEPT_H */
#if defined (__x86_64__)
extern void vmx_load_msrs(struct vcpu *n);
-void vmx_restore_msrs(struct vcpu *d);
+void vmx_restore_msrs(struct vcpu *v);
#else
#define vmx_load_msrs(_n) ((void)0)
#define vmx_restore_msrs(_v) ((void)0)
extern int vmcs_size;
-enum {
+enum {
VMX_INDEX_MSR_LSTAR = 0,
VMX_INDEX_MSR_STAR,
VMX_INDEX_MSR_CSTAR,
struct cpu_user_regs *inst_decoder_regs; /* current context */
};
-#define PC_DEBUG_PORT 0x80
+#define PC_DEBUG_PORT 0x80
struct arch_vmx_struct {
struct vmcs_struct *vmcs; /* VMCS pointer in virtual */
#define vmx_schedule_tail(next) \
(next)->thread.arch_vmx.arch_vmx_schedule_tail((next))
-#define VMX_DOMAIN(ed) ((ed)->arch.arch_vmx.flags)
+#define VMX_DOMAIN(v) ((v)->arch.arch_vmx.flags)
#define ARCH_VMX_VMCS_LOADED 0 /* VMCS has been loaded and active */
#define ARCH_VMX_VMCS_LAUNCH 1 /* Needs VMCS launch */
extern unsigned int opt_vmx_debug_level;
#define VMX_DBG_LOG(level, _f, _a...) \
if ((level) & opt_vmx_debug_level) \
- printk("[VMX]" _f "\n", ## _a )
+ printk("[VMX:%d.%d] " _f "\n", \
+ current->domain->domain_id, current->vcpu_id, ## _a)
#else
#define VMX_DBG_LOG(level, _f, _a...)
#endif